ADHD Adults Diagnosis

ADHD Adults Diagnosis

library(dplyr)

Attaching package: 'dplyr'
The following objects are masked from 'package:stats':

    filter, lag
The following objects are masked from 'package:base':

    intersect, setdiff, setequal, union
library(ggplot2)
Warning: package 'ggplot2' was built under R version 4.5.2
library(ggrepel)
Warning: package 'ggrepel' was built under R version 4.5.2
library(metafor)
Loading required package: Matrix
Warning: package 'Matrix' was built under R version 4.5.2
Loading required package: metadat
Loading required package: numDeriv

Loading the 'metafor' package (version 4.8-0). For an
introduction to the package please type: help(metafor)
library(stringr)
Warning: package 'stringr' was built under R version 4.5.2
library(tidyr)
Warning: package 'tidyr' was built under R version 4.5.2

Attaching package: 'tidyr'
The following objects are masked from 'package:Matrix':

    expand, pack, unpack
library(writexl)
Warning: package 'writexl' was built under R version 4.5.2
data_location <- "https://raw.githubusercontent.com/jeremymiles/adhd_git/refs/heads/main/data/diagnosis_data.csv"

d <- read.csv(data_location)

d <- d %>%
  dplyr::rename(true_negative_self_report = true_negative_self.report)

capitalize_first <- function(s) {
  if (is.na(s) || s == "") { #Handle NAs and empty strings
    return(s)
  }
  first_letter <- str_sub(s, 1, 1)
  rest_of_string <- str_sub(s, 2)
  return(str_c(str_to_upper(first_letter), rest_of_string))
}

You can add options to executable code like this

#|fig-height: 12
d_ss <- d %>% 
  dplyr::select(
    ID, size, starts_with("sensitivity"), starts_with("specificity"),
    test_description_self
  ) %>%
  dplyr::select(
    ID, size,
    ends_with(
      c("self_report", "neuropsycho_tests", "clinician_interview",
        "combination", "clinician_tool", "clinician_rating", 
        "peer_rating", "neuroimaging", "neuroimaging", 
        "observational", "biomarker", "EEG")
    )
  ) %>%
  dplyr::select(
    !starts_with(c(
      "sensitivity_CI", "specificity_CI", 
      "sensitivity_other", "specificity_other"))
  )

d_test_description_self <- d %>%
  dplyr::select(
    ID, test_description_self
  ) %>% 
  dplyr::distinct()

d_sens <- d_ss %>%
  dplyr::select(ID, starts_with("sens")
  )

d_spec <- d_ss %>%
  dplyr::select(ID, starts_with("spec")
  )

d_sens_long <- d_sens %>%
  tidyr::pivot_longer(
    cols = -ID
  ) %>%
  dplyr::rename(sensitivity = value)%>%
  dplyr::mutate(name = stringr::str_remove(name, "sensitivity_"))
  


d_spec_long <- d_spec %>%
  tidyr::pivot_longer(
    cols = -ID
  ) %>%
  dplyr::rename(specificity = value) %>%
  dplyr::mutate(name = stringr::str_remove(name, "specificity_"))


d_ss_long <- dplyr::full_join(
  d_sens_long, d_spec_long
) %>% 
  dplyr::filter(
    !is.na(sensitivity) & !is.na(specificity)
  ) %>% 
  dplyr::mutate(
    name = sapply(name, capitalize_first)
  ) %>%
  dplyr::mutate(
    name = stringr::str_replace_all(name, "_", " "),
    name = ifelse(
      name == "Neuropsycho tests", "Neuropsychological Test", name
      ),
    name = ifelse(
      name == "Self report", "Self-Report Questionnaire", name
      ),
    name = ifelse(
      name == "Peer rating", "Peer-Rating", name
      ),
    name = ifelse(
      name == "Clinician interview", "Clinician Tool", name
      ),
    
)
Joining with `by = join_by(ID, name)`
# mark neurotypicals
d_neurotypical <- d %>% dplyr::select(ID, Neurotypical, size) %>% 
  dplyr::mutate(
    Neurotypical = Neurotypical == "Neurotypical"
  )

d_neurotypical_Only <- d %>% 
  dplyr::mutate(
    Neurotypical_Only = Neurotypical == "Neurotypical" &
        Clinical != "Clinical" &
        Autism != "Autism" &
        Antisocial != "Antisocial" &
        Depression != "Depression" &
        Feigning != "Feigning"
  ) %>% dplyr::select(ID, Neurotypical_Only, size)


# count the number of different types of study
d_name_count <- data.frame(dplyr::bind_rows(
  c(name = "Combination", count = sum(d$Combination == "Combination")),
  c(name = "Biomarker", count = sum(d$Biomarker == "Biomarker")),
  c(name = "Clinician Tool", count = sum(d$Clinician.interview == "Clinician interview")),
  c(name = "EEG", count = sum(d$EEG == "EEG")),
  c(name = "Neuroimaging", count = sum(d$Neuroimaging == "Neuroimaging")),
  c(name = "Neuropsychological Test",   count = sum(d$Neuropsychological == "Neuropsychological")),
  c(name = "Peer-Rating",   count = sum(d$Peer.report == "Peer report")),
  c(name = "Self-Report Questionnaire",   count = sum(d$Self.report == "Self report"))
))

# merge with long data
d_ss_long <- d_ss_long %>%
  dplyr::full_join(
    d_neurotypical
  ) %>% 
  dplyr::full_join(
    d_neurotypical_Only
  ) %>% 
    dplyr::filter(!is.na(name))
Joining with `by = join_by(ID)`
Joining with `by = join_by(ID, size)`
ggplot2::ggplot(
  d_ss_long, 
  aes(
    x = sensitivity, 
    y = specificity)) +
  geom_point() +
  xlab("Sensitivity (%)") + 
  ylab("Specificity (%)")

d_ss_long <- d_ss_long %>%
  dplyr::full_join(d_name_count, by = "name") %>%
  dplyr::mutate(
    name_with_count = paste0(name, " (n = ", count, " studies)")
  )

ggplot2::ggplot(
  d_ss_long, 
  aes(
    x = sensitivity, 
    y = specificity,
    colour = name, shape = Neurotypical)) +
  geom_point(size = 2.5) +
  xlab("Sensitivity (%)") + 
  ylab("Specificity (%)") +
  facet_wrap(~ name_with_count) +
  guides(colour = "none") +
  theme(legend.position = "bottom")

#Figure 8
ggplot2::ggplot(
  d_ss_long, 
  aes(
    x = sensitivity, 
    y = specificity,
    colour = name, shape = Neurotypical_Only)) +
  geom_point(size = 2.5) +
  xlab("Sensitivity") + 
  ylab("Specificity") +
  facet_wrap(~ name, nrow = 2) +
  guides(colour = "none") +
  labs(
    shape = "Neurotypical only"
  )

# separate charts
for (name_1 in d_ss_long$name) {
  plot <- ggplot2::ggplot(
    d_ss_long %>% dplyr::filter(name_1 == name), 
    aes(
      x = sensitivity, 
      y = specificity,
      shape = Neurotypical_Only)) +
    geom_point(size = 2.5) +
    xlab("Sensitivity") + 
    ylab("Specificity") +
    guides(colour = "none") +
    labs(
      shape = "Neurotypical only"
    ) +
    ggtitle(name_1)
  print(plot)
}

# Figure 8 separate charts

Self report only

# this is figure 5
cat("self report<p>")
self report<p>
# 1. Prepare your data first
plot_data <- d_ss_long %>%
  dplyr::filter(
    name == "Self-Report"
  ) %>%
  dplyr::inner_join(
    d_test_description_self
  ) %>%
  dplyr::mutate(
    test_description = word(test_description_self, 1),
    size = size)
Joining with `by = join_by(ID)`
# Set colors for some
plot_data <- plot_data %>%
  dplyr::mutate(
    `Frequent Tools` = "Other",
    `Frequent Tools` = ifelse(substr(test_description, 1, 5) == "CAARS", "CAARS", `Frequent Tools`),
    `Frequent Tools` = ifelse(substr(test_description, 1, 5) == "BAARS", "BAARS", `Frequent Tools`),
    `Frequent Tools` = ifelse(substr(test_description, 1, 4) == "ASRS", "ASRS", `Frequent Tools`),
    `Frequent Tools` = ifelse(substr(test_description, 1, 4) == "WURS", "WURS", `Frequent Tools`)
  )

plot_data %>%
  dplyr::group_by(test_description, `Frequent Tools`) %>%
  dplyr::summarise(n = dplyr::n()) %>%
  dplyr::arrange(desc(n)) %>%
  knitr::kable()
`summarise()` has grouped output by 'test_description'. You can override using
the `.groups` argument.
test_description Frequent Tools n
# 2. Inspect your data (optional, but highly recommended for debugging)
# print(str(plot_data))
# print(summary(plot_data$size))
# print(sum(is.na(plot_data$size)))
# print(range(plot_data$size, na.rm = TRUE))

# 1. Define your colors in a named vector
# You must assign a color to every category, or the others will disappear/turn grey.
my_colors <- c(
  "ASRS"  = "red",    # Replace with your preferred color
  "CAARS" = "blue",   # Replace with your preferred color
  "BAARS" = "darkgreen",  # Replace with your preferred color
  "WURS" = "brown4",  # Replace with your preferred color
  "Other" = "black"   # The specific requirement
)

# 2. Plot
ggplot2::ggplot(
  data = plot_data, 
  aes(
    x = sensitivity,
    y = specificity,
    shape = Neurotypical_Only,
    size = size,
    color = `Frequent Tools`
  )
) +
  geom_point() +
  # 3. Add the manual scale
  scale_color_manual(values = my_colors) +
  xlab("Sensitivity (%)") +
  ylab("Specificity (%)") +
  ggrepel::geom_text_repel(
    aes(label = test_description), max.overlaps = 10, size = 3) +
  labs(
    shape = "Neurotypical only"
  ) +
  scale_size_continuous(
    name = "Size", # This sets the legend title
    breaks = c(50, 500, 1000), # Original untransformed values for legend
    labels = c("50", "500", "1000"), # Corresponding labels
    guide = "legend", # Explicitly request a legend
    # Set limits based on the *transformed* size values from your plot_data
    # This is crucial for the scale to match your actual plotted points
  ) +  theme(legend.position = "right")
Warning: No shared levels found between `names(values)` of the manual scale and the
data's colour values.

d_test_description_np <- d %>%
  dplyr::select(
    ID, test_description_neuropsycho_tests, size
  ) %>% 
  dplyr::distinct() %>% 
  dplyr::mutate(
      test_description = word(test_description_neuropsycho_tests, 1)
  ) 

d_test_description_np %>%
  dplyr::group_by(test_description) %>%
  dplyr::summarise(n = dplyr::n()) %>%
  dplyr::arrange(desc(n)) %>%
  knitr::kable()
test_description n
99
Model 8
QbTest 4
AQT 2
Battery 2
C-CPT-II 2
Go/NoGo 2
MOXO-dCPT 2
BQSS 1
C-CPT 1
C-CPT-3 1
DS 1
IVA+Plus-FSRCQ 1
Measures 1
QbTest-Plus 1
SCWT 1
Stroop 1
TOAD 1
TOVA 1
rm(plot_data)

# This is figure 6
cat("Neuropsychological \n Tests<p>")
Neuropsychological 
 Tests<p>
plot_data <- d_ss_long %>%
  dplyr::filter(
    name == "Neuropsychological \n Tests"
  ) %>%dplyr::inner_join(
    d_test_description_np
  ) %>%
  dplyr::mutate(test_description = word(test_description_neuropsycho_tests, 1)) 
Joining with `by = join_by(ID, size)`
# Set colors for some
plot_data <- plot_data %>%
  dplyr::mutate(
    `Frequent Tools` = "Other",
    `Frequent Tools` = 
      ifelse(substr(test_description, 1, 3) == "AQT", "AQT", `Frequent Tools`),
    `Frequent Tools` = 
      ifelse(substr(test_description, 1, 5) == "C-CPT", "C-CPT", `Frequent Tools`),
    `Frequent Tools` = 
      ifelse(substr(test_description, 1, 4) == "MOXO", "MOXO", `Frequent Tools`),
    `Frequent Tools` = 
      ifelse(substr(test_description, 1, 6) == "QbTest", "QbTest", `Frequent Tools`
             ),
    `Frequent Tools` = 
      ifelse(substr(test_description, 1, 5) == "Go-No", "Go-NoGo", `Frequent Tools`)
  )
  
  my_colors <- c(
  "AQT"  = "red",    # Replace with your preferred color
  "C-CPT" = "blue",   # Replace with your preferred color
  "MOXO" = "darkgreen",  # Replace with your preferred color
  "QbTest" = "chocolate4",
  "Go-NoGo" = "yellow",
  "Other" = "black"   # The specific requirement
)
  
  plot_data %>% ggplot2::ggplot(
    aes(
      x = sensitivity, 
      y = specificity,
      shape = Neurotypical_Only, size = size,
    color = `Frequent Tools`)
    ) +
  geom_point() +
  scale_color_manual(values = my_colors) +
  xlab("Sensitivity (%)") + 
  ylab("Specificity (%)") +
  geom_text_repel(aes(label = test_description), max.overlaps = 10, size = 3) +
    labs(
    shape = "Neurotypical only"
  ) +  theme(legend.position = "right")
Warning: No shared levels found between `names(values)` of the manual scale and the
data's colour values.

AUC_vars <- 
  d %>% dplyr::select(
    starts_with("AUC") 
  ) %>%
  dplyr::select(-starts_with("AUC_CI")) %>%
  names()
AUC_vars
 [1] "AUC_self_report"         "AUC_peer_rating"        
 [3] "AUC_neuropsycho_tests"   "AUC_neuroimaging"       
 [5] "AUC_clinician_interview" "AUC_feigningADHD"       
 [7] "AUC_observational"       "AUC_combination"        
 [9] "AUC_DMV"                 "AUC_clinician_tool"     
[11] "AUC_biomarker"           "AUC_general_cognitive"  
[13] "AUC_EEG"                
d <- d %>%
  dplyr::mutate(
    group = dplyr::case_when(
      Neurotypical == "Neurotypical" &
        Clinical != "Clinical" &
        Autism != "Autism" &
        Antisocial != "Antisocial" &
        Depression != "Depression" &
        Feigning != "Feigning" ~ "Neurotypical",
      (Clinical == "Clinical" |
         Autism == "Autism" |
         Antisocial == "Antisocial" |
         Depression == "Depression") &
        Neurotypical != "Neurotypical" &
        Feigning != "Feigning" ~ "Clinical",
      Neurotypical == "Neurotypical" & (
        Clinical == "Clinical" |
          Autism == "Autism" |
          Antisocial == "Antisocial" |
          Depression == "Depression" |
          Feigning == "Feigning" # Include Feigning here if needed for "Both"
      ) ~ "Both (or more)",
      TRUE ~ NA_character_
    )
  ) 



d_long_accuracy <- d %>%
  dplyr::select(
    accuracy_biomarker,
    accuracy_combination,
    accuracy_EEG,
    accuracy_neuroimaging,
    accuracy_neuropsycho_tests,
    accuracy_observational,
    accuracy_peer_rating,
    accuracy_self_report,
    group
  ) %>% 
  dplyr::filter(!is.na(group)) %>%
  pivot_longer(
    cols = starts_with("accuracy_"),
    names_to = "accuracy_type",
    values_to = "accuracy_value",
    names_prefix = "accuracy_"
  )%>% 
  dplyr::mutate(measure = "accuracy") %>%
  dplyr::rename_with(~ gsub("accuracy_", "", .x), starts_with("accuracy"))




d_long_AUC <- d %>%
  dplyr::select(
    starts_with("AUC"),
    group
  ) %>% 
  dplyr::mutate(
    AUC_neuropsycho_tests = as.numeric(AUC_neuropsycho_tests)
  ) %>% 
  dplyr::select(-starts_with("AUC_CI")) %>% 
  dplyr::filter(!is.na(group)) %>%
  pivot_longer(
    cols = starts_with("AUC_"),
    names_to = "AUC_type",
    values_to = "AUC_value",
    names_prefix = "AUC_"
  ) %>% 
  dplyr::mutate(
    AUC_value = ifelse(
      AUC_value < 1, AUC_value * 100, AUC_value
    )
  ) %>% 
  dplyr::mutate(measure = "AUC") %>%
  dplyr::rename_with(~ gsub("AUC_", "", .x), starts_with("AUC")) %>%     
  dplyr::filter(!is.na(value)) %>% 
  dplyr::filter(type != "feigningADHD")
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `AUC_neuropsycho_tests = as.numeric(AUC_neuropsycho_tests)`.
Caused by warning:
! NAs introduced by coercion
d_long_both <- dplyr::bind_rows(
  d_long_accuracy, d_long_AUC
) %>% dplyr::filter(!is.na(value))

# Make labels nicer.

d_long_both <- d_long_both %>%
  dplyr::mutate(
    type = dplyr::case_when(
     type == "biomarker" ~ "Biomarker",
     type == "clinician_interview" ~ "Clinician\nTool",
     type == "combination" ~ "Combination",
     type == "neuroimaging" ~ "Neuroimaging",
     type == "observational" ~ "Observational",
     type == "neuropsycho_tests" ~ "Neuro-\npsychological",
     type == "peer_rating" ~ "Peer\nRating",
     type == "self_report" ~ "Self-Report",
     .default = type
    ),
    measure = ifelse(measure == "accuracy", "Accuracy", measure)
  )
  


d_long_both %>% dplyr::filter(!is.na(value)) %>%
  dplyr::rename(Group = group) %>%
  ggplot2::ggplot(
    aes(x = Group, y = value)
  )  + 
  geom_boxplot() +
  facet_grid(type ~ measure)+
   theme(axis.text.x = element_text(angle = 90, hjust = 1))

# Figure 7
d_long_both %>% dplyr::filter(!is.na(value)) %>%
  dplyr::rename(Group = group) %>%
  ggplot2::ggplot(
    aes(x = Group, y = value)
  )  + 
  geom_boxplot() +
  facet_grid(measure ~ type)+
   theme(axis.text.x = element_text(angle = 90, hjust = 1))+
  labs(y = NULL)

d_long_both %>% dplyr::filter(!is.na(value)) %>%
  dplyr::rename(Group = group) %>%
  ggplot2::ggplot(
    aes(x = Group, y = value, color = measure)
  )  + 
  geom_boxplot() +
  facet_wrap(~ type) +
   theme(axis.text.x = element_text(angle = 90, hjust = 1))

Function to get Accuracy

library(dplyr)
library(kableExtra)

Attaching package: 'kableExtra'
The following object is masked from 'package:dplyr':

    group_rows
# Function to read CSV and select specific columns
read_adhd_data <- function(file_path) {
  
  # Read the CSV file
  data <- read.csv(file_path)
  
  # Select only the required columns
  clean_data <- data %>%
    select(n_ADHD, 
           sensitivity_self_report, 
           specificity_self_report,
           size, ID)
  
  return(clean_data)
}

########################### Usage example
accuracy <- read_adhd_data(data_location)
#############################

# Check the result
head(accuracy)
  n_ADHD sensitivity_self_report specificity_self_report size
1    518                      NA                      NA  845
2     38                      NA                      NA   69
3    142                      92                      NA  280
4   1006                      NA                      NA 1135
5     63                      89                      13   69
6     70                      81                      71  140
                    ID
1   Abramson, 2023{#5}
2     Adamou, 2022{#8}
3      Aita, 2018{#16}
4      Amen, 2021{#28}
5    Bakare, 2020{#45}
6 Bastiaens, 2017{#55}
calculate_adhd_stats <- function(data) {
  
  processed_data <- data %>%
    mutate(
      # 1. Calculate Prevalence (P / Total)
      Prevalence = n_ADHD / size,
      
      # 2. Calculate (1 - Prevalence)
      One_Minus_Prevalence = 1 - Prevalence,
      
      # 3. Create cleaner columns for Sensitivity/Specificity for the formula
      Sensitivity = sensitivity_self_report,
      Specificity = specificity_self_report,
      
      # 4. Calculate Accuracy
      # Formula: (Sens * Prev) + (Spec * (1 - Prev))
      Accuracy = (Sensitivity * Prevalence) + (Specificity * One_Minus_Prevalence)
    )
  
  return(processed_data)
}
accuracy <- calculate_adhd_stats(accuracy)
head(accuracy)
  n_ADHD sensitivity_self_report specificity_self_report size
1    518                      NA                      NA  845
2     38                      NA                      NA   69
3    142                      92                      NA  280
4   1006                      NA                      NA 1135
5     63                      89                      13   69
6     70                      81                      71  140
                    ID Prevalence One_Minus_Prevalence Sensitivity Specificity
1   Abramson, 2023{#5}  0.6130178           0.38698225          NA          NA
2     Adamou, 2022{#8}  0.5507246           0.44927536          NA          NA
3      Aita, 2018{#16}  0.5071429           0.49285714          92          NA
4      Amen, 2021{#28}  0.8863436           0.11365639          NA          NA
5    Bakare, 2020{#45}  0.9130435           0.08695652          89          13
6 Bastiaens, 2017{#55}  0.5000000           0.50000000          81          71
  Accuracy
1       NA
2       NA
3       NA
4       NA
5  82.3913
6  76.0000
#install.packages("kableExtra")


accuracy <- accuracy %>%
  select(-Sensitivity, -Specificity)

# Create and save in one pipe chain
output_file <- "accuracy.html"


# Create and save
kableExtra::kbl(accuracy) %>%
  kable_styling(bootstrap_options = c("striped", "hover")) %>%
  save_kable(file = output_file)

Program Summary of Findings Table

# Test: combination
# Outcome: Clinical_misdiagnosis



CreateProgSummary <- function(test_outcome_and_result) {

  if (test_outcome_and_result == "cost_self_report") {
    test_outcome_and_result <- "cost_self.report"
  }
  
  reverse_min_max <- 
    stringr::str_detect(test_outcome_and_result, "sensitivity") | 
    stringr::str_detect(test_outcome_and_result, "specificity") 
  
  print(test_outcome_and_result)

  if (test_outcome_and_result %in% 
      c(
        "admin_self_report", 
        "admin_neuropsycho_tests", 
        "cost_neuropsycho_tests",
        "concordance_self_report",
        "concordance_neuropsycho_tests",
        "kappa_neuropsycho_tests",
        "kappa_neuroimaging")
      ) {
    return(
      data.frame(
        test_outcome_and_result = test_outcome_and_result,
        contributing_studies = "Misnamed variable (probably doesn't matter?)",
        primary_results  = "Misnamed variable"
        )
    )
  }
  
  
  non_numeric_question <- !is.numeric(d[[test_outcome_and_result]]) 
  
  d_now <-
    d %>%
    dplyr::mutate(!!rlang::sym(test_outcome_and_result) := as.numeric(!!rlang::sym(test_outcome_and_result))) %>%
    dplyr::filter(!is.na(!!rlang::sym(test_outcome_and_result)))
  
  n <- nrow(d_now)
  min_value <- min(d_now[[test_outcome_and_result]])
  max_value <- max(d_now[[test_outcome_and_result]])
  
  min_id <- d_now[which(d_now[[test_outcome_and_result]] == min_value), "Refid"] %>% paste0(., collapse = ", ")
  max_id <- d_now[which(d_now[[test_outcome_and_result]] == max_value), "Refid"] %>% paste0(., collapse = ", ")
  
  
  contributing_studies <- (glue::glue(
    "{paste(d_now$ID)}")
  ) %>% as.character() %>% paste(., collapse = "; ")
  
  if(non_numeric_question) {
    contributing_studies <- primary_results <- "Warning: non-numeric result."
  } else {
    contributing_studies <- 
      glue::glue("{n} studies ({contributing_studies})")
    if (!reverse_min_max) {
    primary_results <- 
      glue::glue(
        "{min_value}(#{min_id}) to {max_value}(#{max_id})"
        )
    } else {
      primary_results <- 
      glue::glue(
        "{max_value}(#{max_id}) to {min_value}(#{min_id})"
        )
    }
  
  }
  
  
  return(
    data.frame(
      test_outcome_and_result = test_outcome_and_result,
      contributing_studies = contributing_studies,
      primary_results = primary_results))
}
                
  
kq_outcome <- c(
  "clinical_misdiagnosis",
  "sensitivity",
  "specificity",
  "admin",
  "kappa",
  "ICC",
  "cost",
  "concordance"
)

kq_index_test <- c(
  "combination",
  "self_report",
  "peer_rating",
  "neuropsycho_tests",
  "neuroimaging",
  "EEG",
  "biomarker",
  "observational",
  "clinician_interview",
  "clinician_tool",
  "feigningADHD"
)  
 
all_kqs <- 
  expand_grid(kq_index_test, kq_outcome ) %>% 
  dplyr::select(kq_outcome, kq_index_test) %>% 
  apply(., 1, paste0, collapse = "_")



res <- lapply(all_kqs, CreateProgSummary)
[1] "clinical_misdiagnosis_combination"
[1] "sensitivity_combination"
[1] "specificity_combination"
[1] "admin_combination"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `admin_combination = as.numeric(admin_combination)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_combination"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "ICC_combination"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_combination"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `cost_combination = as.numeric(cost_combination)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_combination"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `concordance_combination = as.numeric(concordance_combination)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "clinical_misdiagnosis_self_report"
[1] "sensitivity_self_report"
[1] "specificity_self_report"
[1] "admin_self_report"
[1] "kappa_self_report"
[1] "ICC_self_report"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_self.report"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `cost_self.report = as.numeric(cost_self.report)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_self_report"
[1] "clinical_misdiagnosis_peer_rating"
[1] "sensitivity_peer_rating"
[1] "specificity_peer_rating"
[1] "admin_peer_rating"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `admin_peer_rating = as.numeric(admin_peer_rating)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_peer_rating"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "ICC_peer_rating"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_peer_rating"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `cost_peer_rating = as.numeric(cost_peer_rating)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_peer_rating"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `concordance_peer_rating = as.numeric(concordance_peer_rating)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "clinical_misdiagnosis_neuropsycho_tests"
[1] "sensitivity_neuropsycho_tests"
[1] "specificity_neuropsycho_tests"
[1] "admin_neuropsycho_tests"
[1] "kappa_neuropsycho_tests"
[1] "ICC_neuropsycho_tests"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_neuropsycho_tests"
[1] "concordance_neuropsycho_tests"
[1] "clinical_misdiagnosis_neuroimaging"
[1] "sensitivity_neuroimaging"
[1] "specificity_neuroimaging"
[1] "admin_neuroimaging"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `admin_neuroimaging = as.numeric(admin_neuroimaging)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_neuroimaging"
[1] "ICC_neuroimaging"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_neuroimaging"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `cost_neuroimaging = as.numeric(cost_neuroimaging)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_neuroimaging"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `concordance_neuroimaging =
  as.numeric(concordance_neuroimaging)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "clinical_misdiagnosis_EEG"
[1] "sensitivity_EEG"
[1] "specificity_EEG"
[1] "admin_EEG"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `admin_EEG = as.numeric(admin_EEG)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_EEG"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "ICC_EEG"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_EEG"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `cost_EEG = as.numeric(cost_EEG)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_EEG"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `concordance_EEG = as.numeric(concordance_EEG)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "clinical_misdiagnosis_biomarker"
[1] "sensitivity_biomarker"
[1] "specificity_biomarker"
[1] "admin_biomarker"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `admin_biomarker = as.numeric(admin_biomarker)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_biomarker"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "ICC_biomarker"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_biomarker"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `cost_biomarker = as.numeric(cost_biomarker)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_biomarker"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `concordance_biomarker = as.numeric(concordance_biomarker)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "clinical_misdiagnosis_observational"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "sensitivity_observational"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "specificity_observational"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "admin_observational"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_observational"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "ICC_observational"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_observational"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_observational"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "clinical_misdiagnosis_clinician_interview"
[1] "sensitivity_clinician_interview"
[1] "specificity_clinician_interview"
[1] "admin_clinician_interview"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `admin_clinician_interview =
  as.numeric(admin_clinician_interview)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_clinician_interview"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "ICC_clinician_interview"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_clinician_interview"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `cost_clinician_interview =
  as.numeric(cost_clinician_interview)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_clinician_interview"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `concordance_clinician_interview =
  as.numeric(concordance_clinician_interview)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "clinical_misdiagnosis_clinician_tool"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "sensitivity_clinician_tool"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "specificity_clinician_tool"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "admin_clinician_tool"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_clinician_tool"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "ICC_clinician_tool"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_clinician_tool"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_clinician_tool"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "clinical_misdiagnosis_feigningADHD"
[1] "sensitivity_feigningADHD"
[1] "specificity_feigningADHD"
[1] "admin_feigningADHD"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `admin_feigningADHD = as.numeric(admin_feigningADHD)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "kappa_feigningADHD"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "ICC_feigningADHD"
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "cost_feigningADHD"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `cost_feigningADHD = as.numeric(cost_feigningADHD)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
[1] "concordance_feigningADHD"
Warning: There was 1 warning in `dplyr::mutate()`.
ℹ In argument: `concordance_feigningADHD =
  as.numeric(concordance_feigningADHD)`.
Caused by warning:
! NAs introduced by coercion
Warning in min(d_now[[test_outcome_and_result]]): no non-missing arguments to
min; returning Inf
Warning in max(d_now[[test_outcome_and_result]]): no non-missing arguments to
max; returning -Inf
writexl::write_xlsx(dplyr::bind_rows(res), "adhd_adults_diagnosis_SoF.xlsx")

Sensitivity and Specificity Meta-Analysis

# Calculate Logit Sensitivity
dat_sens <- 
  metafor::escalc(
    measure="PLO", 
    xi=true_positive_self_report, 
    ni=(true_positive_self_report + false_negative_self_report), data=d, add=0.5)
dat_sens$outcome <- "sens"

# Calculate Logit Specificity
dat_spec <- 
  metafor::escalc(
    measure="PLO", 
    xi=true_negative_self_report, 
    ni=(true_negative_self_report + false_positive_self_report), 
    data=d, add=0.5)
dat_spec$outcome <- "spec"

# Combine into long format
dat_long <- rbind(dat_sens, dat_spec)
dat_long <- dat_long[order(dat_long$ID), ] # Group by study


res <- metafor::rma.mv(
  yi, vi, 
  mods = ~ outcome - 1,         # Separate means for sens and spec
  random = ~ outcome | ID,   # Random effects for outcomes within studies
  struct = "UN",                # Unstructured covariance (estimates correlation)
  data = dat_long)
Warning: 186 rows with NAs omitted from model fitting.
summary(res)

Multivariate Meta-Analysis Model (k = 78; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-118.1308   236.2616   246.2616   257.9153   247.1188   

Variance Components:

outer factor: ID      (nlvls = 40)
inner factor: outcome (nlvls = 2)

            estim    sqrt  k.lvl  fixed  level 
tau^2.1    0.8204  0.9058     40     no   sens 
tau^2.2    1.2910  1.1362     38     no   spec 

      rho.sens  rho.spec    sens  spec 
sens         1                 -    38 
spec   -0.3058         1      no     - 

Test for Residual Heterogeneity:
QE(df = 76) = 1007.8301, p-val < .0001

Test of Moderators (coefficients 1:2):
QM(df = 2) = 146.7839, p-val < .0001

Model Results:

             estimate      se    zval    pval   ci.lb   ci.ub      
outcomesens    1.4085  0.1605  8.7740  <.0001  1.0939  1.7232  *** 
outcomespec    1.1356  0.1933  5.8762  <.0001  0.7568  1.5144  *** 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
preds <- predict(res, newmods = diag(2), transf = transf.ilogit, digits = 3) %>%
  as.data.frame()
rownames(preds) <- c("Sensitivity", "Specificity")
print(preds)
                 pred     ci.lb     ci.ub pi.lb pi.ub tau2.level
Sensitivity 0.8035361 0.7491152 0.8485390    NA    NA         NA
Specificity 0.7568735 0.6806666 0.8197112    NA    NA         NA
dat_sub <- subset(dat_long, outcome == "sens") 
dat_sub <- dat_sub %>% dplyr::filter(!is.na(vi))
k <- nrow(dat_sub) # Automatically count the studies

# 2. Create the forest plot with dynamic scaling
metafor::forest(dat_sub$yi, 
                vi = dat_sub$vi, 
                slab = dat_sub$ID,
                transf = transf.ilogit, 
                # ylim: bottom is -2 for the diamond, top is k+3 for headers
                ylim = c(-2, k + 3), 
                rows = 1:k,
                header = "Study",
                main = "Sensitivity")

addpoly(res$beta[1], 
        sei = res$se[1], 
        row = -1, 
        transf = transf.ilogit, 
        mlab = "Pooled Sensitivity",
        col = "red")

dat_sub <- subset(dat_long, outcome == "spec") 
dat_sub <- dat_sub %>% dplyr::filter(!is.na(vi))
k <- nrow(dat_sub) # Automatically count the studies

# 2. Create the forest plot with dynamic scaling
metafor::forest(dat_sub$yi, 
                vi = dat_sub$vi, 
                slab = dat_sub$ID,
                transf = transf.ilogit, 
                # ylim: bottom is -2 for the diamond, top is k+3 for headers
                ylim = c(-2, k + 3), 
                rows = 1:k,
                header = "Study",
                main = "Specificity")

addpoly(res$beta[2], 
        sei = res$se[2], 
        row = -1, 
        transf = transf.ilogit, 
        mlab = "Pooled Specificity",
        col = "red")

I need a meta-analysis of the diagnostic accuracy of the index tests included in the study. Most important are the self report questionnaires.

size

n_ADHD

true_positive_self_report

false_positive_self_report

false_negative_self_report

true_negative_self-report